library(tidyverse)
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.1 ──
## ✓ ggplot2 3.3.5 ✓ purrr 0.3.4
## ✓ tibble 3.1.5 ✓ dplyr 1.0.7
## ✓ tidyr 1.1.4 ✓ stringr 1.4.0
## ✓ readr 2.0.2 ✓ forcats 0.5.1
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## x dplyr::filter() masks stats::filter()
## x dplyr::lag() masks stats::lag()
library(janitor)
##
## Attaching package: 'janitor'
## The following objects are masked from 'package:stats':
##
## chisq.test, fisher.test
library(rstan)
## Loading required package: StanHeaders
## rstan (Version 2.21.3, GitRev: 2e1f913d3ca3)
## For execution on a local, multicore CPU with excess RAM we recommend calling
## options(mc.cores = parallel::detectCores()).
## To avoid recompilation of unchanged Stan programs, we recommend calling
## rstan_options(auto_write = TRUE)
##
## Attaching package: 'rstan'
## The following object is masked from 'package:tidyr':
##
## extract
library(bayesplot)
## This is bayesplot version 1.8.1
## - Online documentation and vignettes at mc-stan.org/bayesplot
## - bayesplot theme set to bayesplot::theme_default()
## * Does _not_ affect other ggplot2 plots
## * See ?bayesplot_theme_set for details on theme setting
#Using code Yue shared to include picture
knitr::include_graphics("/Users/brendaonyango/Desktop/IMG_4318 copy.png")
If the chain is mixing too slowly it will have only explored a limited range in the first several thousands iterations. It will overestimate the plausibility of pi values in this range and underestimate the plasuibility of values outside the range.
The chain having a high correlation will not estimate the correct peak, if there is one, in the posterior.
Chains that get stuck are overestimating in the values where they are stuck and will produce peaks in the posterior that are not there.
MCMC diagnostics are important because simulations are not perfect and diagnostics, used holistically, can identify how to improve a MCMC chain.
MCMC simulations are helpful because they are an efficient and flexible alternative to grid approximations. Grid approximations for more complex problems take a long time and grid approximations are not good for estimating posteriors when there are multiple dimensions. MCMCs can.
Rstan combines R with the Stan “engine”; Stan is written in C++ and is good for Bayesian modeling.
How to put Markov chains in applied language for social problems/questions.
We’re given Y|pi~Bin(n, pi) and pi~Beta(3, 8) and n = 10 with Y = 2.
#step 1: defining a grid with 5 pi values
grid_data <- data.frame(pi_grid = seq(from = 0, to = 1, length = 5))
# step 2: evaluating prior and liklihood at each pi
grid_data <- grid_data |>
mutate(prior = dbeta(pi_grid, 3,8),
liklihood =dbinom(2,10, pi_grid))
Next is approximating the posterior using the product of the liklihood and prior and then normalizing them.
#step 3: approximate the posterior
grid_data <- grid_data |>
mutate(unnormalized = liklihood * prior,
posterior = unnormalized/sum(unnormalized))
#confirming that posterior approximation sums to 1
grid_data |> summarize(sum(unnormalized), sum(posterior))
## sum(unnormalized) sum(posterior)
## 1 0.8765603 1
Next we’ll examine the grid approxiation posterior rounded to 2 decimal places.
#step 4
round(grid_data, 2) #2 is indicating how many decimal points to use
## pi_grid prior liklihood unnormalized posterior
## 1 0.00 0.00 0.00 0.00 0.00
## 2 0.25 3.00 0.28 0.85 0.96
## 3 0.50 0.70 0.04 0.03 0.04
## 4 0.75 0.01 0.00 0.00 0.00
## 5 1.00 0.00 0.00 0.00 0.00
Finally, we’ll plot this model.
# plotting the grid approximation posterior
ggplot(grid_data, aes(x = pi_grid, y = posterior)) +
geom_point() +
geom_segment(aes(x = pi_grid, xend = pi_grid, y = 0, yend = posterior))
#step 1
grd_data <- data.frame(p_grid = seq(from = 0, to = 1, length = 201)) #changed length to 201
#step 2
grd_data <- grd_data |> #changed names of data, prior, liklihood to keep distinct from part a
mutate(prir = dbeta(p_grid, 3,8), #prior remained the same
liklhood = dbinom(2,10, p_grid)) #successes and outcomes remained teh same
#step 3
grd_data <- grd_data |>
mutate(unnormalized = liklhood * prir,
posterir = unnormalized/sum(unnormalized))
#confirming that posterior approximation sums to 1
grd_data |> summarize(sum(unnormalized), sum(posterir))
## sum(unnormalized) sum(posterir)
## 1 41.79567 1
#step 4
round(grd_data, 2)
## p_grid prir liklhood unnormalized posterir
## 1 0.00 0.00 0.00 0.00 0.00
## 2 0.00 0.01 0.00 0.00 0.00
## 3 0.01 0.03 0.00 0.00 0.00
## 4 0.01 0.07 0.01 0.00 0.00
## 5 0.02 0.13 0.02 0.00 0.00
## 6 0.03 0.19 0.02 0.00 0.00
## 7 0.03 0.26 0.03 0.01 0.00
## 8 0.04 0.34 0.04 0.01 0.00
## 9 0.04 0.43 0.05 0.02 0.00
## 10 0.04 0.53 0.06 0.03 0.00
## 11 0.05 0.63 0.07 0.05 0.00
## 12 0.06 0.73 0.09 0.06 0.00
## 13 0.06 0.84 0.10 0.08 0.00
## 14 0.06 0.95 0.11 0.11 0.00
## 15 0.07 1.06 0.12 0.13 0.00
## 16 0.07 1.17 0.14 0.16 0.00
## 17 0.08 1.29 0.15 0.19 0.00
## 18 0.09 1.40 0.16 0.22 0.01
## 19 0.09 1.51 0.17 0.26 0.01
## 20 0.10 1.62 0.18 0.30 0.01
## 21 0.10 1.72 0.19 0.33 0.01
## 22 0.10 1.83 0.20 0.37 0.01
## 23 0.11 1.93 0.21 0.41 0.01
## 24 0.12 2.02 0.22 0.45 0.01
## 25 0.12 2.12 0.23 0.49 0.01
## 26 0.12 2.21 0.24 0.53 0.01
## 27 0.13 2.30 0.25 0.57 0.01
## 28 0.14 2.38 0.26 0.61 0.01
## 29 0.14 2.45 0.26 0.65 0.02
## 30 0.14 2.53 0.27 0.68 0.02
## 31 0.15 2.60 0.28 0.72 0.02
## 32 0.16 2.66 0.28 0.75 0.02
## 33 0.16 2.72 0.29 0.78 0.02
## 34 0.16 2.77 0.29 0.80 0.02
## 35 0.17 2.82 0.29 0.83 0.02
## 36 0.18 2.87 0.30 0.85 0.02
## 37 0.18 2.91 0.30 0.87 0.02
## 38 0.18 2.94 0.30 0.88 0.02
## 39 0.19 2.97 0.30 0.89 0.02
## 40 0.20 3.00 0.30 0.90 0.02
## 41 0.20 3.02 0.30 0.91 0.02
## 42 0.21 3.04 0.30 0.92 0.02
## 43 0.21 3.05 0.30 0.92 0.02
## 44 0.22 3.06 0.30 0.92 0.02
## 45 0.22 3.06 0.30 0.91 0.02
## 46 0.22 3.06 0.30 0.91 0.02
## 47 0.23 3.06 0.29 0.90 0.02
## 48 0.24 3.05 0.29 0.89 0.02
## 49 0.24 3.04 0.29 0.88 0.02
## 50 0.24 3.02 0.29 0.86 0.02
## 51 0.25 3.00 0.28 0.85 0.02
## 52 0.26 2.98 0.28 0.83 0.02
## 53 0.26 2.96 0.27 0.81 0.02
## 54 0.26 2.93 0.27 0.79 0.02
## 55 0.27 2.90 0.26 0.77 0.02
## 56 0.28 2.87 0.26 0.74 0.02
## 57 0.28 2.83 0.25 0.72 0.02
## 58 0.29 2.79 0.25 0.70 0.02
## 59 0.29 2.75 0.24 0.67 0.02
## 60 0.30 2.71 0.24 0.65 0.02
## 61 0.30 2.67 0.23 0.62 0.01
## 62 0.30 2.62 0.23 0.60 0.01
## 63 0.31 2.58 0.22 0.57 0.01
## 64 0.32 2.53 0.22 0.55 0.01
## 65 0.32 2.48 0.21 0.52 0.01
## 66 0.32 2.43 0.20 0.50 0.01
## 67 0.33 2.38 0.20 0.47 0.01
## 68 0.34 2.32 0.19 0.45 0.01
## 69 0.34 2.27 0.19 0.43 0.01
## 70 0.35 2.22 0.18 0.40 0.01
## 71 0.35 2.16 0.18 0.38 0.01
## 72 0.36 2.11 0.17 0.36 0.01
## 73 0.36 2.05 0.16 0.34 0.01
## 74 0.36 2.00 0.16 0.32 0.01
## 75 0.37 1.94 0.15 0.30 0.01
## 76 0.38 1.89 0.15 0.28 0.01
## 77 0.38 1.83 0.14 0.26 0.01
## 78 0.38 1.78 0.14 0.24 0.01
## 79 0.39 1.72 0.13 0.23 0.01
## 80 0.40 1.67 0.13 0.21 0.01
## 81 0.40 1.61 0.12 0.19 0.00
## 82 0.41 1.56 0.12 0.18 0.00
## 83 0.41 1.51 0.11 0.17 0.00
## 84 0.42 1.45 0.11 0.15 0.00
## 85 0.42 1.40 0.10 0.14 0.00
## 86 0.42 1.35 0.10 0.13 0.00
## 87 0.43 1.30 0.09 0.12 0.00
## 88 0.44 1.25 0.09 0.11 0.00
## 89 0.44 1.20 0.08 0.10 0.00
## 90 0.44 1.16 0.08 0.09 0.00
## 91 0.45 1.11 0.08 0.08 0.00
## 92 0.46 1.06 0.07 0.08 0.00
## 93 0.46 1.02 0.07 0.07 0.00
## 94 0.47 0.98 0.07 0.06 0.00
## 95 0.47 0.93 0.06 0.06 0.00
## 96 0.48 0.89 0.06 0.05 0.00
## 97 0.48 0.85 0.06 0.05 0.00
## 98 0.48 0.81 0.05 0.04 0.00
## 99 0.49 0.78 0.05 0.04 0.00
## 100 0.50 0.74 0.05 0.03 0.00
## 101 0.50 0.70 0.04 0.03 0.00
## 102 0.50 0.67 0.04 0.03 0.00
## 103 0.51 0.64 0.04 0.02 0.00
## 104 0.52 0.60 0.04 0.02 0.00
## 105 0.52 0.57 0.03 0.02 0.00
## 106 0.52 0.54 0.03 0.02 0.00
## 107 0.53 0.51 0.03 0.02 0.00
## 108 0.54 0.48 0.03 0.01 0.00
## 109 0.54 0.46 0.03 0.01 0.00
## 110 0.54 0.43 0.02 0.01 0.00
## 111 0.55 0.41 0.02 0.01 0.00
## 112 0.56 0.38 0.02 0.01 0.00
## 113 0.56 0.36 0.02 0.01 0.00
## 114 0.57 0.34 0.02 0.01 0.00
## 115 0.57 0.32 0.02 0.01 0.00
## 116 0.58 0.30 0.02 0.00 0.00
## 117 0.58 0.28 0.01 0.00 0.00
## 118 0.58 0.26 0.01 0.00 0.00
## 119 0.59 0.24 0.01 0.00 0.00
## 120 0.60 0.23 0.01 0.00 0.00
## 121 0.60 0.21 0.01 0.00 0.00
## 122 0.60 0.20 0.01 0.00 0.00
## 123 0.61 0.18 0.01 0.00 0.00
## 124 0.62 0.17 0.01 0.00 0.00
## 125 0.62 0.16 0.01 0.00 0.00
## 126 0.62 0.15 0.01 0.00 0.00
## 127 0.63 0.14 0.01 0.00 0.00
## 128 0.64 0.13 0.01 0.00 0.00
## 129 0.64 0.12 0.01 0.00 0.00
## 130 0.64 0.11 0.00 0.00 0.00
## 131 0.65 0.10 0.00 0.00 0.00
## 132 0.66 0.09 0.00 0.00 0.00
## 133 0.66 0.08 0.00 0.00 0.00
## 134 0.66 0.08 0.00 0.00 0.00
## 135 0.67 0.07 0.00 0.00 0.00
## 136 0.68 0.06 0.00 0.00 0.00
## 137 0.68 0.06 0.00 0.00 0.00
## 138 0.69 0.05 0.00 0.00 0.00
## 139 0.69 0.05 0.00 0.00 0.00
## 140 0.70 0.04 0.00 0.00 0.00
## 141 0.70 0.04 0.00 0.00 0.00
## 142 0.70 0.03 0.00 0.00 0.00
## 143 0.71 0.03 0.00 0.00 0.00
## 144 0.72 0.03 0.00 0.00 0.00
## 145 0.72 0.03 0.00 0.00 0.00
## 146 0.72 0.02 0.00 0.00 0.00
## 147 0.73 0.02 0.00 0.00 0.00
## 148 0.74 0.02 0.00 0.00 0.00
## 149 0.74 0.02 0.00 0.00 0.00
## 150 0.74 0.01 0.00 0.00 0.00
## 151 0.75 0.01 0.00 0.00 0.00
## 152 0.76 0.01 0.00 0.00 0.00
## 153 0.76 0.01 0.00 0.00 0.00
## 154 0.76 0.01 0.00 0.00 0.00
## 155 0.77 0.01 0.00 0.00 0.00
## 156 0.78 0.01 0.00 0.00 0.00
## 157 0.78 0.01 0.00 0.00 0.00
## 158 0.78 0.00 0.00 0.00 0.00
## 159 0.79 0.00 0.00 0.00 0.00
## 160 0.80 0.00 0.00 0.00 0.00
## 161 0.80 0.00 0.00 0.00 0.00
## 162 0.80 0.00 0.00 0.00 0.00
## 163 0.81 0.00 0.00 0.00 0.00
## 164 0.82 0.00 0.00 0.00 0.00
## 165 0.82 0.00 0.00 0.00 0.00
## 166 0.83 0.00 0.00 0.00 0.00
## 167 0.83 0.00 0.00 0.00 0.00
## 168 0.84 0.00 0.00 0.00 0.00
## 169 0.84 0.00 0.00 0.00 0.00
## 170 0.84 0.00 0.00 0.00 0.00
## 171 0.85 0.00 0.00 0.00 0.00
## 172 0.86 0.00 0.00 0.00 0.00
## 173 0.86 0.00 0.00 0.00 0.00
## 174 0.86 0.00 0.00 0.00 0.00
## 175 0.87 0.00 0.00 0.00 0.00
## 176 0.88 0.00 0.00 0.00 0.00
## 177 0.88 0.00 0.00 0.00 0.00
## 178 0.88 0.00 0.00 0.00 0.00
## 179 0.89 0.00 0.00 0.00 0.00
## 180 0.90 0.00 0.00 0.00 0.00
## 181 0.90 0.00 0.00 0.00 0.00
## 182 0.90 0.00 0.00 0.00 0.00
## 183 0.91 0.00 0.00 0.00 0.00
## 184 0.92 0.00 0.00 0.00 0.00
## 185 0.92 0.00 0.00 0.00 0.00
## 186 0.92 0.00 0.00 0.00 0.00
## 187 0.93 0.00 0.00 0.00 0.00
## 188 0.94 0.00 0.00 0.00 0.00
## 189 0.94 0.00 0.00 0.00 0.00
## 190 0.95 0.00 0.00 0.00 0.00
## 191 0.95 0.00 0.00 0.00 0.00
## 192 0.96 0.00 0.00 0.00 0.00
## 193 0.96 0.00 0.00 0.00 0.00
## 194 0.96 0.00 0.00 0.00 0.00
## 195 0.97 0.00 0.00 0.00 0.00
## 196 0.98 0.00 0.00 0.00 0.00
## 197 0.98 0.00 0.00 0.00 0.00
## 198 0.98 0.00 0.00 0.00 0.00
## 199 0.99 0.00 0.00 0.00 0.00
## 200 1.00 0.00 0.00 0.00 0.00
## 201 1.00 0.00 0.00 0.00 0.00
Next I’ll plot this approximation.
ggplot(grd_data, aes(x = p_grid, y = posterir)) +
geom_point() +
geom_segment(aes(x = p_grid, xend = p_grid, y = 0, yend = posterir))
We can see that b gives a much better estimate than a.
We’re given a Gamma-Poison model for Y|lambda~Pois(lambda) and lambda~Gamma(20,5). N = 3 and (Y1, Y2, Y3) = (0, 1, 0).
# Step 1: Define a grid of 10 lambda values
grid_data <- data.frame(lambda_grid = seq(from = 0, to = 8, length = 8))
# Step 2: Evaluate the prior & likelihood at each lambda
grid_data <- grid_data |>
mutate(prior = dgamma(lambda_grid, 20, 5), #given prior
likelihood = dpois(0, lambda_grid) * dpois(1, lambda_grid) * dpois(0, lambda_grid))
# Step 3: Approximate the posterior
grid_data <- grid_data |>
mutate(unnormalized = likelihood * prior,
posterior = unnormalized / sum(unnormalized))
# Set the seed
set.seed(1500)
# Step 4: sample from the discretized posterior
post_sample <- sample_n(grid_data, size = 10000,
weight = posterior, replace = TRUE)
#plotting histogram of the above
ggplot(post_sample, aes(x = lambda_grid)) +
geom_histogram(aes(y = ..density..), color = "white") +
stat_function(fun = dgamma, args = list(20, 5)) +
lims(x = c(0, 8))
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## Warning: Removed 2 rows containing missing values (geom_bar).
# Step 1: Define a grid of 10 lambda values
grid_data <- data.frame(lambda_grid = seq(from = 0, to = 8, length = 201)) # went up to 10 values to see what happens after 8
# Step 2: Evaluate the prior & likelihood at each lambda
grid_data <- grid_data |>
mutate(prior = dgamma(lambda_grid, 20, 5), #given prior
likelihood = dpois(0, lambda_grid) * dpois(1, lambda_grid) * dpois(0, lambda_grid))
# Step 3: Approximate the posterior
grid_data <- grid_data |>
mutate(unnormalized = likelihood * prior,
posterior = unnormalized / sum(unnormalized))
# Set the seed
set.seed(1500)
# Step 4: sample from the discretized posterior
post_sample <- sample_n(grid_data, size = 10000,
weight = posterior, replace = TRUE)
#plotting histogram of the above
ggplot(post_sample, aes(x = lambda_grid)) +
geom_histogram(aes(y = ..density..), color = "white") +
stat_function(fun = dgamma, args = list(20, 5)) +
lims(x = c(0, 8))
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## Warning: Removed 2 rows containing missing values (geom_bar).
## Exercise 6.7
We’re given a normal-normal model for mu with Y|mu~N(mu, 1.3^2) and mu~N(10,1.2^2). Observed data is (Y1, Y2, Y3, Y4) = (7.1, 8.9, 8.4, 8.6)
# Step 1: Define a grid of 50 mu values
grid_data <- data.frame(mu_grid = seq(from = 7, to = 10, length = 50)) #narrowed the sequence range
#changed to 50 values to demonstrate that I know as grid spacing decreases, quality of approximation decreases
# Step 2: Evaluate the prior & likelihood at each mu
grid_data <- grid_data |>
mutate(prior = dnorm(mu_grid, mean = 10, sd = 1.2),
likelihood = dnorm(7.1, mean = mu_grid, sd = 1.3)*
dnorm(8.9, mean = mu_grid, sd = 1.3)*
dnorm(8.4, mean = mu_grid, sd = 1.3)*
dnorm(8.6, mean = mu_grid, sd = 1.3))
# Step 3: Approximate the posterior
grid_data <- grid_data |>
mutate(unnormalized = likelihood * prior,
posterior = unnormalized / sum(unnormalized))
ggplot(grid_data, aes(x = mu_grid, y = posterior)) +
geom_point() +
geom_segment(aes(x = mu_grid, xend = mu_grid, y = 0, yend = posterior))
# Step 1: Define a grid of 201 mu values
grid_data <- data.frame(mu_grid = seq(from = 5, to = 15, length = 201))
# Step 2: Evaluate the prior & likelihood at each mu
grid_data <- grid_data |>
mutate(prior = dnorm(mu_grid, mean = 10, sd = 1.2),
likelihood = dnorm(7.1, mean = mu_grid, sd = 1.3)*
dnorm(8.9, mean = mu_grid, sd = 1.3)*
dnorm(8.4, mean = mu_grid, sd = 1.3)*
dnorm(8.6, mean = mu_grid, sd = 1.3))
# Step 3: Approximate the posterior
grid_data <- grid_data |>
mutate(unnormalized = likelihood * prior,
posterior = unnormalized / sum(unnormalized))
ggplot(grid_data, aes(x = mu_grid, y = posterior)) +
geom_point() +
geom_segment(aes(x = mu_grid, xend = mu_grid, y = 0, yend = posterior))
A high dimensional Bayesian model could be useful for analyzing how well a medication works over time across different patients. The dimensions are time/age of patient and symptoms; there will be different symptoms across patients and across time.
The issue with dimensionality is that even for 2 dimensions, there could be an enormous scale of approximations to make because approximations would need to be calculated for both the x and y axis. If we had 100 values for the x-dimension and 100 for the y-dimension that would be 10,000 approximations. This may be timely and is inefficient to run as dimensions and number of values increase.
The drawback for both grid approximations and MCMC are that they are discretized approximations. We should remember this when using these methods for continuous data.
The advantage of both MCMC and grid approximation are that they allow for approximations for posteriors that are difficult to solve by-hand.
An advantage that grid has over MCMC is that the code seems generally easier to write and can be done in R (without using Rstan and associated C++ issues).
An advantage of MCMC over grid approximation is the ability to estimate posteriors for multiple dimensions.
For something to be a Markov chain, event i is dependent only on i-1. There is memorylessness and otherwise independence for the event for i-2, i-3, and so on.
This scenario is not an Markov chain if I assume that the first several nights in a row involved me going to a Thai restaurant. The prior frequency of Thai restaurants (on days i-2, i-3, etc.) may influence my choice on day i.
This is also not a Markov chain because there is complete independence from day to day for winning the lottery. Markov chains need i to be dependent on i-1.
This is not a Markov chain for similar reasons to a. There is memory about i-2, i-3, etc., i.e. an experienced chess player does remember the prior moves of other players. If the players are not good and can only remember the last game they played, then this could be a Markov chain.
Use information to write RStan syntax.
bb_model <- " data { int<lower = 0, upper = 12> Y; } parameters { real<lower = 0, upper = 1> pi; } model { Y ~ binomial(20, pi); pi ~ beta(1, 1); } "
gp_model <- " data { int<lower = 0> Y[3]; } parameters { real<lower = 0> lambda; } model { Y ~ poisson(lambda); lambda ~ gamma(4, 2); } "
normal_model <- " data { vector[4] Y; } parameters { real mu; } model { Y ~ normal(mu, 1); mu ~ normal(0, 10); } "
Write the Rstan syntax for simulating the posterior.
bb_sim <- stan(model_code = bb_model, data = list(Y = 12), chains = 4, iter = 1000*2, seed = 100) #chose 1000 to reduce processing time
gp_sim <- stan(model_code = gp_model, data = list(Y = c(3)), chains = 4, iter = 1000*2, seed = 200)
d <- list(Y = c(12.2)) nn_sim <- stan(model_code = normal_model, data = d, chains = 4, iter = 1000*2, seed = 300)
library(bayesrules)
bb_model <- "
data {
int<lower = 0, upper = 10> Y;
}
parameters {
real<lower = 0, upper = 1> pi;
}
model {
Y ~ binomial(10, pi);
pi ~ beta(3, 8);
}
"
Step 2 is simulating the posterior.
bb_sim <- stan(model_code = bb_model, data = list(Y = 2),
chains = 3, iter = 12000*2, seed = 400)
## Trying to compile a simple C file
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/Rcpp/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/unsupported" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/src/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppParallel/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG -DBOOST_DISABLE_ASSERTS -DBOOST_PENDING_INTEGER_LOG2_HPP -DSTAN_THREADS -DBOOST_NO_AUTO_PTR -include '/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp' -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1 -I/opt/R/arm64/include -fPIC -falign-functions=64 -Wall -g -O2 -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:16: error: expected ';' after top level declarator
## namespace Eigen {
## ^
## ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
## ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1
##
## SAMPLING FOR MODEL 'c071b3d8201f3c9ddeb3e3d5c2149cfc' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 1.1e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.11 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 24000 [ 0%] (Warmup)
## Chain 1: Iteration: 2400 / 24000 [ 10%] (Warmup)
## Chain 1: Iteration: 4800 / 24000 [ 20%] (Warmup)
## Chain 1: Iteration: 7200 / 24000 [ 30%] (Warmup)
## Chain 1: Iteration: 9600 / 24000 [ 40%] (Warmup)
## Chain 1: Iteration: 12000 / 24000 [ 50%] (Warmup)
## Chain 1: Iteration: 12001 / 24000 [ 50%] (Sampling)
## Chain 1: Iteration: 14400 / 24000 [ 60%] (Sampling)
## Chain 1: Iteration: 16800 / 24000 [ 70%] (Sampling)
## Chain 1: Iteration: 19200 / 24000 [ 80%] (Sampling)
## Chain 1: Iteration: 21600 / 24000 [ 90%] (Sampling)
## Chain 1: Iteration: 24000 / 24000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.04999 seconds (Warm-up)
## Chain 1: 0.052789 seconds (Sampling)
## Chain 1: 0.102779 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'c071b3d8201f3c9ddeb3e3d5c2149cfc' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 2e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.02 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 24000 [ 0%] (Warmup)
## Chain 2: Iteration: 2400 / 24000 [ 10%] (Warmup)
## Chain 2: Iteration: 4800 / 24000 [ 20%] (Warmup)
## Chain 2: Iteration: 7200 / 24000 [ 30%] (Warmup)
## Chain 2: Iteration: 9600 / 24000 [ 40%] (Warmup)
## Chain 2: Iteration: 12000 / 24000 [ 50%] (Warmup)
## Chain 2: Iteration: 12001 / 24000 [ 50%] (Sampling)
## Chain 2: Iteration: 14400 / 24000 [ 60%] (Sampling)
## Chain 2: Iteration: 16800 / 24000 [ 70%] (Sampling)
## Chain 2: Iteration: 19200 / 24000 [ 80%] (Sampling)
## Chain 2: Iteration: 21600 / 24000 [ 90%] (Sampling)
## Chain 2: Iteration: 24000 / 24000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.049339 seconds (Warm-up)
## Chain 2: 0.050729 seconds (Sampling)
## Chain 2: 0.100068 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'c071b3d8201f3c9ddeb3e3d5c2149cfc' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 3e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.03 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 24000 [ 0%] (Warmup)
## Chain 3: Iteration: 2400 / 24000 [ 10%] (Warmup)
## Chain 3: Iteration: 4800 / 24000 [ 20%] (Warmup)
## Chain 3: Iteration: 7200 / 24000 [ 30%] (Warmup)
## Chain 3: Iteration: 9600 / 24000 [ 40%] (Warmup)
## Chain 3: Iteration: 12000 / 24000 [ 50%] (Warmup)
## Chain 3: Iteration: 12001 / 24000 [ 50%] (Sampling)
## Chain 3: Iteration: 14400 / 24000 [ 60%] (Sampling)
## Chain 3: Iteration: 16800 / 24000 [ 70%] (Sampling)
## Chain 3: Iteration: 19200 / 24000 [ 80%] (Sampling)
## Chain 3: Iteration: 21600 / 24000 [ 90%] (Sampling)
## Chain 3: Iteration: 24000 / 24000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 0.04925 seconds (Warm-up)
## Chain 3: 0.051465 seconds (Sampling)
## Chain 3: 0.100715 seconds (Total)
## Chain 3:
mcmc_trace(bb_sim, pars = "pi", size = 0.3)
The range of values on the x axis is 0-6000. It is not 12,000 because the first half of iterations for each chain aer thrown out by the iter argument.
Create density plot of values for 3 chains.
mcmc_dens(bb_sim, pars = "pi") +
yaxis_text(TRUE) +
ylab("density")
summarize_beta_binomial(alpha = 3, beta = 8, y = 2, n = 10)
## model alpha beta mean mode var sd
## 1 prior 3 8 0.2727273 0.2222222 0.016528926 0.12856487
## 2 posterior 5 16 0.2380952 0.2105263 0.008245723 0.09080596
plot_beta_binomial(alpha = 3, beta = 8, y = 2, n = 10)
The MCMC approximation is pretty good. It centers around 0.2 and the true posterior mean is 0.238.
Repeat the above for Beta(4,3) and Y = 4, n = 12.
#step 1: define model
bbb_model <- "
data {
int<lower = 0, upper = 12> Y;
}
parameters {
real<lower = 0, upper = 1> pi;
}
model {
Y ~ binomial(12, pi);
pi ~ beta(4, 3);
}
"
#step 2: simulate the model
bbb_sim <- stan(model_code = bbb_model, data = list(Y = 4), #y = 4
chains = 3, iter = 12000*2, seed = 400)
## Trying to compile a simple C file
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/Rcpp/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/unsupported" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/src/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppParallel/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG -DBOOST_DISABLE_ASSERTS -DBOOST_PENDING_INTEGER_LOG2_HPP -DSTAN_THREADS -DBOOST_NO_AUTO_PTR -include '/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp' -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1 -I/opt/R/arm64/include -fPIC -falign-functions=64 -Wall -g -O2 -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:16: error: expected ';' after top level declarator
## namespace Eigen {
## ^
## ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
## ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1
##
## SAMPLING FOR MODEL '1b51acab522f7c612890772a1d696e4d' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 0.000164 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 1.64 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 24000 [ 0%] (Warmup)
## Chain 1: Iteration: 2400 / 24000 [ 10%] (Warmup)
## Chain 1: Iteration: 4800 / 24000 [ 20%] (Warmup)
## Chain 1: Iteration: 7200 / 24000 [ 30%] (Warmup)
## Chain 1: Iteration: 9600 / 24000 [ 40%] (Warmup)
## Chain 1: Iteration: 12000 / 24000 [ 50%] (Warmup)
## Chain 1: Iteration: 12001 / 24000 [ 50%] (Sampling)
## Chain 1: Iteration: 14400 / 24000 [ 60%] (Sampling)
## Chain 1: Iteration: 16800 / 24000 [ 70%] (Sampling)
## Chain 1: Iteration: 19200 / 24000 [ 80%] (Sampling)
## Chain 1: Iteration: 21600 / 24000 [ 90%] (Sampling)
## Chain 1: Iteration: 24000 / 24000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.050971 seconds (Warm-up)
## Chain 1: 0.054054 seconds (Sampling)
## Chain 1: 0.105025 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL '1b51acab522f7c612890772a1d696e4d' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 3e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.03 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 24000 [ 0%] (Warmup)
## Chain 2: Iteration: 2400 / 24000 [ 10%] (Warmup)
## Chain 2: Iteration: 4800 / 24000 [ 20%] (Warmup)
## Chain 2: Iteration: 7200 / 24000 [ 30%] (Warmup)
## Chain 2: Iteration: 9600 / 24000 [ 40%] (Warmup)
## Chain 2: Iteration: 12000 / 24000 [ 50%] (Warmup)
## Chain 2: Iteration: 12001 / 24000 [ 50%] (Sampling)
## Chain 2: Iteration: 14400 / 24000 [ 60%] (Sampling)
## Chain 2: Iteration: 16800 / 24000 [ 70%] (Sampling)
## Chain 2: Iteration: 19200 / 24000 [ 80%] (Sampling)
## Chain 2: Iteration: 21600 / 24000 [ 90%] (Sampling)
## Chain 2: Iteration: 24000 / 24000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.049153 seconds (Warm-up)
## Chain 2: 0.051943 seconds (Sampling)
## Chain 2: 0.101096 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL '1b51acab522f7c612890772a1d696e4d' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 3e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.03 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 24000 [ 0%] (Warmup)
## Chain 3: Iteration: 2400 / 24000 [ 10%] (Warmup)
## Chain 3: Iteration: 4800 / 24000 [ 20%] (Warmup)
## Chain 3: Iteration: 7200 / 24000 [ 30%] (Warmup)
## Chain 3: Iteration: 9600 / 24000 [ 40%] (Warmup)
## Chain 3: Iteration: 12000 / 24000 [ 50%] (Warmup)
## Chain 3: Iteration: 12001 / 24000 [ 50%] (Sampling)
## Chain 3: Iteration: 14400 / 24000 [ 60%] (Sampling)
## Chain 3: Iteration: 16800 / 24000 [ 70%] (Sampling)
## Chain 3: Iteration: 19200 / 24000 [ 80%] (Sampling)
## Chain 3: Iteration: 21600 / 24000 [ 90%] (Sampling)
## Chain 3: Iteration: 24000 / 24000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 0.053457 seconds (Warm-up)
## Chain 3: 0.06263 seconds (Sampling)
## Chain 3: 0.116087 seconds (Total)
## Chain 3:
#step 3:
mcmc_trace(bbb_sim, pars = "pi", size = 0.3)
#density plot
mcmc_dens(bbb_sim, pars = "pi") +
yaxis_text(TRUE) +
ylab("density")
#actual posterior
summarize_beta_binomial(alpha = 4, beta = 3, y = 4, n = 12)
## model alpha beta mean mode var sd
## 1 prior 4 3 0.5714286 0.6000000 0.03061224 0.1749636
## 2 posterior 8 11 0.4210526 0.4117647 0.01218837 0.1104009
plot_beta_binomial(alpha = 4, beta = 3, y = 4, n = 12)
This is another good MCMC approximation that is centered around 0.4 while the true posterior mean is 0.42.
a & b
# Step 1: Define the model
gp_model <- "
data {
int<lower = 0> Y[3];
}
parameters {
real<lower = 0> lambda;
}
model {
Y ~ poisson(lambda);
lambda ~ gamma(20, 5);
}
"
# Step 2: simulate the posterior
gp_sim <- stan(model_code = gp_model, data = list(Y = c(0,1,0)),
chains = 4, iter = 10000*2, seed = 500)
## Trying to compile a simple C file
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/Rcpp/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/unsupported" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/src/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppParallel/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG -DBOOST_DISABLE_ASSERTS -DBOOST_PENDING_INTEGER_LOG2_HPP -DSTAN_THREADS -DBOOST_NO_AUTO_PTR -include '/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp' -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1 -I/opt/R/arm64/include -fPIC -falign-functions=64 -Wall -g -O2 -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:16: error: expected ';' after top level declarator
## namespace Eigen {
## ^
## ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
## ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1
##
## SAMPLING FOR MODEL 'f65cbc91cb99a701a7ac1a425b95cf50' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 9e-06 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.09 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 20000 [ 0%] (Warmup)
## Chain 1: Iteration: 2000 / 20000 [ 10%] (Warmup)
## Chain 1: Iteration: 4000 / 20000 [ 20%] (Warmup)
## Chain 1: Iteration: 6000 / 20000 [ 30%] (Warmup)
## Chain 1: Iteration: 8000 / 20000 [ 40%] (Warmup)
## Chain 1: Iteration: 10000 / 20000 [ 50%] (Warmup)
## Chain 1: Iteration: 10001 / 20000 [ 50%] (Sampling)
## Chain 1: Iteration: 12000 / 20000 [ 60%] (Sampling)
## Chain 1: Iteration: 14000 / 20000 [ 70%] (Sampling)
## Chain 1: Iteration: 16000 / 20000 [ 80%] (Sampling)
## Chain 1: Iteration: 18000 / 20000 [ 90%] (Sampling)
## Chain 1: Iteration: 20000 / 20000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.042444 seconds (Warm-up)
## Chain 1: 0.042697 seconds (Sampling)
## Chain 1: 0.085141 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'f65cbc91cb99a701a7ac1a425b95cf50' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 3e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.03 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 20000 [ 0%] (Warmup)
## Chain 2: Iteration: 2000 / 20000 [ 10%] (Warmup)
## Chain 2: Iteration: 4000 / 20000 [ 20%] (Warmup)
## Chain 2: Iteration: 6000 / 20000 [ 30%] (Warmup)
## Chain 2: Iteration: 8000 / 20000 [ 40%] (Warmup)
## Chain 2: Iteration: 10000 / 20000 [ 50%] (Warmup)
## Chain 2: Iteration: 10001 / 20000 [ 50%] (Sampling)
## Chain 2: Iteration: 12000 / 20000 [ 60%] (Sampling)
## Chain 2: Iteration: 14000 / 20000 [ 70%] (Sampling)
## Chain 2: Iteration: 16000 / 20000 [ 80%] (Sampling)
## Chain 2: Iteration: 18000 / 20000 [ 90%] (Sampling)
## Chain 2: Iteration: 20000 / 20000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.044736 seconds (Warm-up)
## Chain 2: 0.045654 seconds (Sampling)
## Chain 2: 0.09039 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'f65cbc91cb99a701a7ac1a425b95cf50' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 4e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.04 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 20000 [ 0%] (Warmup)
## Chain 3: Iteration: 2000 / 20000 [ 10%] (Warmup)
## Chain 3: Iteration: 4000 / 20000 [ 20%] (Warmup)
## Chain 3: Iteration: 6000 / 20000 [ 30%] (Warmup)
## Chain 3: Iteration: 8000 / 20000 [ 40%] (Warmup)
## Chain 3: Iteration: 10000 / 20000 [ 50%] (Warmup)
## Chain 3: Iteration: 10001 / 20000 [ 50%] (Sampling)
## Chain 3: Iteration: 12000 / 20000 [ 60%] (Sampling)
## Chain 3: Iteration: 14000 / 20000 [ 70%] (Sampling)
## Chain 3: Iteration: 16000 / 20000 [ 80%] (Sampling)
## Chain 3: Iteration: 18000 / 20000 [ 90%] (Sampling)
## Chain 3: Iteration: 20000 / 20000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 0.041986 seconds (Warm-up)
## Chain 3: 0.049917 seconds (Sampling)
## Chain 3: 0.091903 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'f65cbc91cb99a701a7ac1a425b95cf50' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 2e-06 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.02 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 20000 [ 0%] (Warmup)
## Chain 4: Iteration: 2000 / 20000 [ 10%] (Warmup)
## Chain 4: Iteration: 4000 / 20000 [ 20%] (Warmup)
## Chain 4: Iteration: 6000 / 20000 [ 30%] (Warmup)
## Chain 4: Iteration: 8000 / 20000 [ 40%] (Warmup)
## Chain 4: Iteration: 10000 / 20000 [ 50%] (Warmup)
## Chain 4: Iteration: 10001 / 20000 [ 50%] (Sampling)
## Chain 4: Iteration: 12000 / 20000 [ 60%] (Sampling)
## Chain 4: Iteration: 14000 / 20000 [ 70%] (Sampling)
## Chain 4: Iteration: 16000 / 20000 [ 80%] (Sampling)
## Chain 4: Iteration: 18000 / 20000 [ 90%] (Sampling)
## Chain 4: Iteration: 20000 / 20000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 0.042867 seconds (Warm-up)
## Chain 4: 0.048631 seconds (Sampling)
## Chain 4: 0.091498 seconds (Total)
## Chain 4:
#trace and density plots
mcmc_trace(gp_sim, pars = "lambda", size = 0.3)
mcmc_dens(gp_sim, pars = "lambda") +
yaxis_text(TRUE) +
ylab("density")
Fro the density plot the most possible posterior value of lambda is 2.5.
summarize_gamma_poisson(shape = 20, rate = 5, sum_y = 1, n = 3)
## model shape rate mean mode var sd
## 1 prior 20 5 4.000 3.8 0.800000 0.8944272
## 2 posterior 21 8 2.625 2.5 0.328125 0.5728220
plot_gamma_poisson(shape = 20, rate = 5, sum_y = 1, n = 3)
This was a good MCMC estimation. The true posterior mean is 2.625 and the approximation centered around 2.5.
# Step 1: Define the model
gpp_model <- "
data {
int<lower = 0> Y[3];
}
parameters {
real<lower = 0> lambda;
}
model {
Y ~ poisson(lambda);
lambda ~ gamma(5, 5);
}
"
# Step 2: simulate the posterior
gpp_sim <- stan(model_code = gpp_model, data = list(Y = c(0,1,0)),
chains = 4, iter = 10000*2, seed = 500)
## Trying to compile a simple C file
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/Rcpp/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/unsupported" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/src/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppParallel/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG -DBOOST_DISABLE_ASSERTS -DBOOST_PENDING_INTEGER_LOG2_HPP -DSTAN_THREADS -DBOOST_NO_AUTO_PTR -include '/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp' -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1 -I/opt/R/arm64/include -fPIC -falign-functions=64 -Wall -g -O2 -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:16: error: expected ';' after top level declarator
## namespace Eigen {
## ^
## ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
## ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1
##
## SAMPLING FOR MODEL 'd6d79c0ae95ef30a3463fe7b9e36388e' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 8e-06 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.08 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 20000 [ 0%] (Warmup)
## Chain 1: Iteration: 2000 / 20000 [ 10%] (Warmup)
## Chain 1: Iteration: 4000 / 20000 [ 20%] (Warmup)
## Chain 1: Iteration: 6000 / 20000 [ 30%] (Warmup)
## Chain 1: Iteration: 8000 / 20000 [ 40%] (Warmup)
## Chain 1: Iteration: 10000 / 20000 [ 50%] (Warmup)
## Chain 1: Iteration: 10001 / 20000 [ 50%] (Sampling)
## Chain 1: Iteration: 12000 / 20000 [ 60%] (Sampling)
## Chain 1: Iteration: 14000 / 20000 [ 70%] (Sampling)
## Chain 1: Iteration: 16000 / 20000 [ 80%] (Sampling)
## Chain 1: Iteration: 18000 / 20000 [ 90%] (Sampling)
## Chain 1: Iteration: 20000 / 20000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.041611 seconds (Warm-up)
## Chain 1: 0.047012 seconds (Sampling)
## Chain 1: 0.088623 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'd6d79c0ae95ef30a3463fe7b9e36388e' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 2e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.02 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 20000 [ 0%] (Warmup)
## Chain 2: Iteration: 2000 / 20000 [ 10%] (Warmup)
## Chain 2: Iteration: 4000 / 20000 [ 20%] (Warmup)
## Chain 2: Iteration: 6000 / 20000 [ 30%] (Warmup)
## Chain 2: Iteration: 8000 / 20000 [ 40%] (Warmup)
## Chain 2: Iteration: 10000 / 20000 [ 50%] (Warmup)
## Chain 2: Iteration: 10001 / 20000 [ 50%] (Sampling)
## Chain 2: Iteration: 12000 / 20000 [ 60%] (Sampling)
## Chain 2: Iteration: 14000 / 20000 [ 70%] (Sampling)
## Chain 2: Iteration: 16000 / 20000 [ 80%] (Sampling)
## Chain 2: Iteration: 18000 / 20000 [ 90%] (Sampling)
## Chain 2: Iteration: 20000 / 20000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.043167 seconds (Warm-up)
## Chain 2: 0.048641 seconds (Sampling)
## Chain 2: 0.091808 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'd6d79c0ae95ef30a3463fe7b9e36388e' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 1e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.01 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 20000 [ 0%] (Warmup)
## Chain 3: Iteration: 2000 / 20000 [ 10%] (Warmup)
## Chain 3: Iteration: 4000 / 20000 [ 20%] (Warmup)
## Chain 3: Iteration: 6000 / 20000 [ 30%] (Warmup)
## Chain 3: Iteration: 8000 / 20000 [ 40%] (Warmup)
## Chain 3: Iteration: 10000 / 20000 [ 50%] (Warmup)
## Chain 3: Iteration: 10001 / 20000 [ 50%] (Sampling)
## Chain 3: Iteration: 12000 / 20000 [ 60%] (Sampling)
## Chain 3: Iteration: 14000 / 20000 [ 70%] (Sampling)
## Chain 3: Iteration: 16000 / 20000 [ 80%] (Sampling)
## Chain 3: Iteration: 18000 / 20000 [ 90%] (Sampling)
## Chain 3: Iteration: 20000 / 20000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 0.041419 seconds (Warm-up)
## Chain 3: 0.043228 seconds (Sampling)
## Chain 3: 0.084647 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'd6d79c0ae95ef30a3463fe7b9e36388e' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 3e-06 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.03 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 20000 [ 0%] (Warmup)
## Chain 4: Iteration: 2000 / 20000 [ 10%] (Warmup)
## Chain 4: Iteration: 4000 / 20000 [ 20%] (Warmup)
## Chain 4: Iteration: 6000 / 20000 [ 30%] (Warmup)
## Chain 4: Iteration: 8000 / 20000 [ 40%] (Warmup)
## Chain 4: Iteration: 10000 / 20000 [ 50%] (Warmup)
## Chain 4: Iteration: 10001 / 20000 [ 50%] (Sampling)
## Chain 4: Iteration: 12000 / 20000 [ 60%] (Sampling)
## Chain 4: Iteration: 14000 / 20000 [ 70%] (Sampling)
## Chain 4: Iteration: 16000 / 20000 [ 80%] (Sampling)
## Chain 4: Iteration: 18000 / 20000 [ 90%] (Sampling)
## Chain 4: Iteration: 20000 / 20000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 0.04331 seconds (Warm-up)
## Chain 4: 0.049339 seconds (Sampling)
## Chain 4: 0.092649 seconds (Total)
## Chain 4:
#trace and density plots
mcmc_trace(gpp_sim, pars = "lambda", size = 0.3)
mcmc_dens(gpp_sim, pars = "lambda") +
yaxis_text(TRUE) +
ylab("density")
#true posterior
summarize_gamma_poisson(shape = 5, rate = 5, sum_y = 1, n = 3)
## model shape rate mean mode var sd
## 1 prior 5 5 1.00 0.800 0.20000 0.4472136
## 2 posterior 6 8 0.75 0.625 0.09375 0.3061862
plot_gamma_poisson(shape = 5, rate = 5, sum_y = 1, n = 3)
The approximation centers around 0.6 and the true posterior mean is 0.75; this MCMC was not as great as earlier ones.
normal_model <- "
data {
vector[4] Y;
}
parameters {
real mu;
}
model {
Y ~ normal(mu, 1.3);
mu ~ normal(10, 1.2);
}
"
d <- list(Y = c(7.1, 8.9, 8.4, 8.6))
nn_sim <- stan(model_code = normal_model, data = d,
chains = 4, iter = 1000*2, seed = 600)
## Trying to compile a simple C file
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/Rcpp/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/unsupported" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/src/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppParallel/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG -DBOOST_DISABLE_ASSERTS -DBOOST_PENDING_INTEGER_LOG2_HPP -DSTAN_THREADS -DBOOST_NO_AUTO_PTR -include '/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp' -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1 -I/opt/R/arm64/include -fPIC -falign-functions=64 -Wall -g -O2 -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:16: error: expected ';' after top level declarator
## namespace Eigen {
## ^
## ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
## ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1
##
## SAMPLING FOR MODEL 'aafc09a58798752b5f677b912ca25e71' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 2.6e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.26 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.00513 seconds (Warm-up)
## Chain 1: 0.005921 seconds (Sampling)
## Chain 1: 0.011051 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'aafc09a58798752b5f677b912ca25e71' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 2e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.02 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.005194 seconds (Warm-up)
## Chain 2: 0.005099 seconds (Sampling)
## Chain 2: 0.010293 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'aafc09a58798752b5f677b912ca25e71' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 2e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.02 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 0.005105 seconds (Warm-up)
## Chain 3: 0.004785 seconds (Sampling)
## Chain 3: 0.00989 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'aafc09a58798752b5f677b912ca25e71' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 1e-06 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.01 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 0.005101 seconds (Warm-up)
## Chain 4: 0.00518 seconds (Sampling)
## Chain 4: 0.010281 seconds (Total)
## Chain 4:
#trace and density plots
mcmc_trace(nn_sim, pars = "mu", size = 0.3)
mcmc_dens(nn_sim, pars = "mu") +
yaxis_text(TRUE) +
ylab("density")
nnormal_model <- "
data {
vector[5] Y;
}
parameters {
real mu;
}
model {
Y ~ normal(mu, 8);
mu ~ normal(-14, 2);
}
"
dd <- list(Y = c(-10.1, 5.5, 0.1, -1.4, 11.5))
nnn_sim <- stan(model_code = nnormal_model, data = dd,
chains = 4, iter = 1000*2, seed = 600)
## Trying to compile a simple C file
## Running /Library/Frameworks/R.framework/Resources/bin/R CMD SHLIB foo.c
## clang -arch arm64 -I"/Library/Frameworks/R.framework/Resources/include" -DNDEBUG -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/Rcpp/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/unsupported" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/BH/include" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/src/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppParallel/include/" -I"/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/rstan/include" -DEIGEN_NO_DEBUG -DBOOST_DISABLE_ASSERTS -DBOOST_PENDING_INTEGER_LOG2_HPP -DSTAN_THREADS -DBOOST_NO_AUTO_PTR -include '/Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp' -D_REENTRANT -DRCPP_PARALLEL_USE_TBB=1 -I/opt/R/arm64/include -fPIC -falign-functions=64 -Wall -g -O2 -c foo.c -o foo.o
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Core:88:
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:1: error: unknown type name 'namespace'
## namespace Eigen {
## ^
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/src/Core/util/Macros.h:628:16: error: expected ';' after top level declarator
## namespace Eigen {
## ^
## ;
## In file included from <built-in>:1:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/StanHeaders/include/stan/math/prim/mat/fun/Eigen.hpp:13:
## In file included from /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Dense:1:
## /Library/Frameworks/R.framework/Versions/4.1-arm64/Resources/library/RcppEigen/include/Eigen/Core:96:10: fatal error: 'complex' file not found
## #include <complex>
## ^~~~~~~~~
## 3 errors generated.
## make: *** [foo.o] Error 1
##
## SAMPLING FOR MODEL 'cdfc0c053324bc0fce964d8c0a25d259' NOW (CHAIN 1).
## Chain 1:
## Chain 1: Gradient evaluation took 1.8e-05 seconds
## Chain 1: 1000 transitions using 10 leapfrog steps per transition would take 0.18 seconds.
## Chain 1: Adjust your expectations accordingly!
## Chain 1:
## Chain 1:
## Chain 1: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 1: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 1: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 1: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 1: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 1: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 1: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 1: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 1: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 1: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 1: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 1: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 1:
## Chain 1: Elapsed Time: 0.005279 seconds (Warm-up)
## Chain 1: 0.005185 seconds (Sampling)
## Chain 1: 0.010464 seconds (Total)
## Chain 1:
##
## SAMPLING FOR MODEL 'cdfc0c053324bc0fce964d8c0a25d259' NOW (CHAIN 2).
## Chain 2:
## Chain 2: Gradient evaluation took 1e-06 seconds
## Chain 2: 1000 transitions using 10 leapfrog steps per transition would take 0.01 seconds.
## Chain 2: Adjust your expectations accordingly!
## Chain 2:
## Chain 2:
## Chain 2: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 2: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 2: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 2: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 2: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 2: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 2: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 2: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 2: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 2: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 2: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 2: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 2:
## Chain 2: Elapsed Time: 0.005406 seconds (Warm-up)
## Chain 2: 0.004812 seconds (Sampling)
## Chain 2: 0.010218 seconds (Total)
## Chain 2:
##
## SAMPLING FOR MODEL 'cdfc0c053324bc0fce964d8c0a25d259' NOW (CHAIN 3).
## Chain 3:
## Chain 3: Gradient evaluation took 2e-06 seconds
## Chain 3: 1000 transitions using 10 leapfrog steps per transition would take 0.02 seconds.
## Chain 3: Adjust your expectations accordingly!
## Chain 3:
## Chain 3:
## Chain 3: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 3: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 3: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 3: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 3: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 3: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 3: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 3: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 3: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 3: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 3: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 3: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 3:
## Chain 3: Elapsed Time: 0.005202 seconds (Warm-up)
## Chain 3: 0.004951 seconds (Sampling)
## Chain 3: 0.010153 seconds (Total)
## Chain 3:
##
## SAMPLING FOR MODEL 'cdfc0c053324bc0fce964d8c0a25d259' NOW (CHAIN 4).
## Chain 4:
## Chain 4: Gradient evaluation took 1e-06 seconds
## Chain 4: 1000 transitions using 10 leapfrog steps per transition would take 0.01 seconds.
## Chain 4: Adjust your expectations accordingly!
## Chain 4:
## Chain 4:
## Chain 4: Iteration: 1 / 2000 [ 0%] (Warmup)
## Chain 4: Iteration: 200 / 2000 [ 10%] (Warmup)
## Chain 4: Iteration: 400 / 2000 [ 20%] (Warmup)
## Chain 4: Iteration: 600 / 2000 [ 30%] (Warmup)
## Chain 4: Iteration: 800 / 2000 [ 40%] (Warmup)
## Chain 4: Iteration: 1000 / 2000 [ 50%] (Warmup)
## Chain 4: Iteration: 1001 / 2000 [ 50%] (Sampling)
## Chain 4: Iteration: 1200 / 2000 [ 60%] (Sampling)
## Chain 4: Iteration: 1400 / 2000 [ 70%] (Sampling)
## Chain 4: Iteration: 1600 / 2000 [ 80%] (Sampling)
## Chain 4: Iteration: 1800 / 2000 [ 90%] (Sampling)
## Chain 4: Iteration: 2000 / 2000 [100%] (Sampling)
## Chain 4:
## Chain 4: Elapsed Time: 0.00503 seconds (Warm-up)
## Chain 4: 0.004766 seconds (Sampling)
## Chain 4: 0.009796 seconds (Total)
## Chain 4:
#trace and density plots
mcmc_trace(nnn_sim, pars = "mu", size = 0.3)
mcmc_dens(nnn_sim, pars = "mu") +
yaxis_text(TRUE) +
ylab("density")
I noticed that while using stan syntax in r code chunks I was able to generate plots and run iterations, but I always got this error: ‘config’ variable ‘CPP’ is defunct Warning in system(paste(CPP, ARGS), ignore.stdout = TRUE, ignore.stderr = TRUE) : error in running command
We could discuss this error next week and whether it’s a huge issue. I also noticed an occasional error that said not to use # in stan but rather //, but this also didn’t seem to stop things from running.